summaryrefslogtreecommitdiffstats
path: root/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h
blob: 030b2916b0fe6901b1af4b3b8ec7e85a964d9216 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later

#pragma once

#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"

namespace FileSys {

template <typename EntryType>
Result BucketTree::ScanContinuousReading(ContinuousReadingInfo* out_info,
                                         const ContinuousReadingParam<EntryType>& param) const {
    static_assert(std::is_trivial_v<ContinuousReadingParam<EntryType>>);

    // Validate our preconditions.
    ASSERT(this->IsInitialized());
    ASSERT(out_info != nullptr);
    ASSERT(m_entry_size == sizeof(EntryType));

    // Reset the output.
    out_info->Reset();

    // If there's nothing to read, we're done.
    R_SUCCEED_IF(param.size == 0);

    // If we're reading a fragment, we're done.
    R_SUCCEED_IF(param.entry.IsFragment());

    // Validate the first entry.
    auto entry = param.entry;
    auto cur_offset = param.offset;
    R_UNLESS(entry.GetVirtualOffset() <= cur_offset, ResultOutOfRange);

    // Create a pooled buffer for our scan.
    PooledBuffer pool(m_node_size, 1);
    char* buffer = nullptr;

    s64 entry_storage_size = m_entry_storage->GetSize();

    // Read the node.
    if (m_node_size <= pool.GetSize()) {
        buffer = pool.GetBuffer();
        const auto ofs = param.entry_set.index * static_cast<s64>(m_node_size);
        R_UNLESS(m_node_size + ofs <= static_cast<size_t>(entry_storage_size),
                 ResultInvalidBucketTreeNodeEntryCount);

        m_entry_storage->Read(reinterpret_cast<u8*>(buffer), m_node_size, ofs);
    }

    // Calculate extents.
    const auto end_offset = cur_offset + static_cast<s64>(param.size);
    s64 phys_offset = entry.GetPhysicalOffset();

    // Start merge tracking.
    s64 merge_size = 0;
    s64 readable_size = 0;
    bool merged = false;

    // Iterate.
    auto entry_index = param.entry_index;
    for (const auto entry_count = param.entry_set.count; entry_index < entry_count; ++entry_index) {
        // If we're past the end, we're done.
        if (end_offset <= cur_offset) {
            break;
        }

        // Validate the entry offset.
        const auto entry_offset = entry.GetVirtualOffset();
        R_UNLESS(entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset);

        // Get the next entry.
        EntryType next_entry = {};
        s64 next_entry_offset;

        if (entry_index + 1 < entry_count) {
            if (buffer != nullptr) {
                const auto ofs = impl::GetBucketTreeEntryOffset(0, m_entry_size, entry_index + 1);
                std::memcpy(std::addressof(next_entry), buffer + ofs, m_entry_size);
            } else {
                const auto ofs = impl::GetBucketTreeEntryOffset(param.entry_set.index, m_node_size,
                                                                m_entry_size, entry_index + 1);
                m_entry_storage->ReadObject(std::addressof(next_entry), ofs);
            }

            next_entry_offset = next_entry.GetVirtualOffset();
            R_UNLESS(param.offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
        } else {
            next_entry_offset = param.entry_set.offset;
        }

        // Validate the next entry offset.
        R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset);

        // Determine the much data there is.
        const auto data_size = next_entry_offset - cur_offset;
        ASSERT(data_size > 0);

        // Determine how much data we should read.
        const auto remaining_size = end_offset - cur_offset;
        const size_t read_size = static_cast<size_t>(std::min(data_size, remaining_size));
        ASSERT(read_size <= param.size);

        // Update our merge tracking.
        if (entry.IsFragment()) {
            // If we can't merge, stop looping.
            if (EntryType::FragmentSizeMax <= read_size || remaining_size <= data_size) {
                break;
            }

            // Otherwise, add the current size to the merge size.
            merge_size += read_size;
        } else {
            //  If we can't merge, stop looping.
            if (phys_offset != entry.GetPhysicalOffset()) {
                break;
            }

            // Add the size to the readable amount.
            readable_size += merge_size + read_size;
            ASSERT(readable_size <= static_cast<s64>(param.size));

            // Update whether we've merged.
            merged |= merge_size > 0;
            merge_size = 0;
        }

        // Advance.
        cur_offset += read_size;
        ASSERT(cur_offset <= end_offset);

        phys_offset += next_entry_offset - entry_offset;
        entry = next_entry;
    }

    // If we merged, set our readable size.
    if (merged) {
        out_info->SetReadSize(static_cast<size_t>(readable_size));
    }
    out_info->SetSkipCount(entry_index - param.entry_index);

    R_SUCCEED();
}

template <typename EntryType>
Result BucketTree::Visitor::ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset,
                                                  size_t size) const {
    static_assert(std::is_trivial_v<EntryType>);
    ASSERT(this->IsValid());

    // Create our parameters.
    ContinuousReadingParam<EntryType> param = {
        .offset = offset,
        .size = size,
        .entry_set = m_entry_set.header,
        .entry_index = m_entry_index,
        .offsets{},
        .entry{},
    };
    std::memcpy(std::addressof(param.offsets), std::addressof(m_offsets),
                sizeof(BucketTree::Offsets));
    std::memcpy(std::addressof(param.entry), m_entry, sizeof(EntryType));

    // Scan.
    R_RETURN(m_tree->ScanContinuousReading<EntryType>(out_info, param));
}

} // namespace FileSys